General shadow code cleanup.
Fixed compilation problems when SHADOW_DEBUG is enabled.
Fixed compilation problems when CONFIG_VMX is undefined.
Simplified l1pte_write_fault and l1pte_read_fault.
Name change: spfn => smfn (shadow machine frame numbers).
In general, the terms pfn and gpfn now refer to pages in the
guest's idea of physical frames (which diffs for full shadow
guests). mfn always refers to a machine frame number.
One bug fix for check_pagetable():
If we're using writable page tables
along with shadow mode, don't check the currently writable page table
page -- check its snapshot instead.
Signed-off-by: michael.fetterman@cl.cam.ac.uk
unsigned long pa;
#ifdef CONFIG_VMX
- if ( unlikely(d->arch.shadow_mode) )
- pa = ((d->arch.shadow_mode == SHM_full_32) ?
+ if ( unlikely(shadow_mode(d)) )
+ pa = ((shadow_mode(d) == SHM_full_32) ?
pagetable_val(ed->arch.monitor_table) :
pagetable_val(ed->arch.shadow_table));
else
pa = pagetable_val(ed->arch.pagetable);
#else
- if ( unlikely(d->arch.shadow_mode) )
+ if ( unlikely(shadow_mode(d)) )
pa = pagetable_val(ed->arch.shadow_table);
else
pa = pagetable_val(ed->arch.pagetable);
if ( l1_pgentry_val(nl1e) & _PAGE_PRESENT )
{
- /* Differ in mapping (bits 12-31), r/w (bit 1), or presence (bit 0)? */
+ /* Same mapping (bits 12-31), r/w (bit 1), and presence (bit 0)? */
if ( ((l1_pgentry_val(ol1e) ^ l1_pgentry_val(nl1e)) & ~0xffc) == 0 )
return update_l1e(pl1e, ol1e, nl1e);
BUG();
}
- if ( unlikely(d->arch.shadow_mode) &&
+ if ( unlikely(shadow_mode(d)) &&
(get_shadow_status(d, page_to_pfn(page)) & PSH_shadowed) )
{
unshadow_table(page_to_pfn(page), type);
struct pfn_info *page;
int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
unsigned int cmd, done = 0;
- unsigned long prev_spfn = 0;
+ unsigned long prev_smfn = 0;
l1_pgentry_t *prev_spl1e = 0;
struct exec_domain *ed = current;
struct domain *d = ed->domain;
cleanup_writable_pagetable(d);
+ if ( unlikely(shadow_mode(d)) )
+ check_pagetable(d, ed->arch.pagetable, "pre-mmu"); /* debug */
+
/*
* If we are resuming after preemption, read how much work we have already
* done. This allows us to set the @done output parameter correctly.
okay = mod_l1_entry((l1_pgentry_t *)va,
mk_l1_pgentry(req.val));
- if ( unlikely(d->arch.shadow_mode) && okay &&
+ if ( unlikely(shadow_mode(d)) && okay &&
(get_shadow_status(d, page-frame_table) &
PSH_shadowed) )
{
shadow_l1_normal_pt_update(
- req.ptr, req.val, &prev_spfn, &prev_spl1e);
+ req.ptr, req.val, &prev_smfn, &prev_spl1e);
put_shadow_status(d);
}
mk_l2_pgentry(req.val),
pfn);
- if ( unlikely(d->arch.shadow_mode) && okay &&
+ if ( unlikely(shadow_mode(d)) && okay &&
(get_shadow_status(d, page-frame_table) &
PSH_shadowed) )
{
* If in log-dirty mode, mark the corresponding pseudo-physical
* page as dirty.
*/
- if ( unlikely(d->arch.shadow_mode == SHM_logdirty) &&
+ if ( unlikely(shadow_mode(d) == SHM_logdirty) &&
mark_dirty(d, pfn) )
d->arch.shadow_dirty_block_count++;
if ( unlikely(pdone != NULL) )
__put_user(done + i, pdone);
+ if ( unlikely(shadow_mode(d)) )
+ check_pagetable(d, ed->arch.pagetable, "post-mmu"); /* debug */
+
UNLOCK_BIGLOCK(d);
return rc;
}
mk_l1_pgentry(val))) )
err = -EINVAL;
- if ( unlikely(d->arch.shadow_mode) )
+ if ( unlikely(shadow_mode(d)) )
{
- unsigned long sval;
+ unsigned long sval = 0;
l1pte_propagate_from_guest(d, &val, &sval);
* the PTE in the PT-holding page. We need the machine frame number
* for this.
*/
- if ( d->arch.shadow_mode == SHM_logdirty )
+ if ( shadow_mode(d) == SHM_logdirty )
mark_dirty(d, va_to_l1mfn(page_nr << PAGE_SHIFT));
check_pagetable(d, ed->arch.pagetable, "va"); /* debug */
PTWR_PRINT_WHICH, ptep, pte);
pte &= ~_PAGE_RW;
- if ( unlikely(d->arch.shadow_mode) )
+ if ( unlikely(shadow_mode(d)) )
{
/* Write-protect the p.t. page in the shadow page table. */
l1pte_propagate_from_guest(d, &pte, &spte);
* STEP 3. Reattach the L1 p.t. page into the current address space.
*/
- if ( (which == PTWR_PT_ACTIVE) && likely(!d->arch.shadow_mode) )
+ if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode(d)) )
{
pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
*pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT);
/* For safety, disconnect the L1 p.t. page from current space. */
if ( (which == PTWR_PT_ACTIVE) &&
- likely(!current->domain->arch.shadow_mode) )
+ likely(!shadow_mode(current->domain)) )
{
*pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
#if 1
/* Free the head page. */
free_shadow_page(
- d, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
+ d, &frame_table[x->smfn_and_flags & PSH_pfn_mask]);
/* Reinitialise the head node. */
x->pfn = 0;
- x->spfn_and_flags = 0;
+ x->smfn_and_flags = 0;
n = x->next;
x->next = NULL;
{
/* Free the shadow page. */
free_shadow_page(
- d, &frame_table[x->spfn_and_flags & PSH_pfn_mask]);
+ d, &frame_table[x->smfn_and_flags & PSH_pfn_mask]);
/* Re-initialise the chain node. */
x->pfn = 0;
- x->spfn_and_flags = 0;
+ x->smfn_and_flags = 0;
/* Add to the free list. */
n = x->next;
{
unsigned long *p;
int restart = 0;
- struct pfn_info *spage = &frame_table[x->spfn_and_flags & PSH_pfn_mask];
+ struct pfn_info *spage = &frame_table[x->smfn_and_flags & PSH_pfn_mask];
switch ( spage->u.inuse.type_info & PGT_type_mask )
{
/* We clear L2 pages by zeroing the guest entries. */
case PGT_l2_page_table:
p = map_domain_mem((spage - frame_table) << PAGE_SHIFT);
- if (d->arch.shadow_mode == SHM_full_32)
+ if ( shadow_mode(d) == SHM_full_32 )
memset(p, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
else
memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
void unshadow_table(unsigned long gpfn, unsigned int type)
{
- unsigned long spfn;
+ unsigned long smfn;
struct domain *d = page_get_owner(&frame_table[gpfn]);
SH_VLOG("unshadow_table type=%08x gpfn=%08lx", type, gpfn);
* guests there won't be a race here as this CPU was the one that
* cmpxchg'ed the page to invalid.
*/
- spfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
+ smfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
delete_shadow_status(d, gpfn);
- free_shadow_page(d, &frame_table[spfn]);
+ free_shadow_page(d, &frame_table[smfn]);
}
#ifdef CONFIG_VMX
void vmx_shadow_clear_state(struct domain *d)
{
- SH_VVLOG("vmx_clear_shadow_state: \n");
+ SH_VVLOG("vmx_clear_shadow_state:");
clear_shadow_state(d);
}
#endif
l2_pgentry_t *spl2e = 0;
unsigned long guest_gpfn;
- __get_machine_to_phys(d, guest_gpfn, gpfn);
+ guest_gpfn = __mfn_to_gpfn(d, gpfn);
SH_VVLOG("shadow_l2_table( %08lx )", gpfn);
#ifdef __i386__
/* Install hypervisor and 2x linear p.t. mapings. */
- if ( d->arch.shadow_mode == SHM_full_32 )
+ if ( shadow_mode(d) == SHM_full_32 )
{
+#ifdef CONFIG_VMX
vmx_update_shadow_state(d->exec_domain[0], gpfn, spfn);
+#else
+ panic("Shadow Full 32 not yet implemented without VMX\n");
+#endif
}
else
{
}
#endif
- if ( d->arch.shadow_mode != SHM_full_32 )
+ if ( shadow_mode(d) != SHM_full_32 )
unmap_domain_mem(spl2e);
SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn);
{
struct exec_domain *ed = current;
struct domain *d = ed->domain;
- unsigned long *gpl1e, *spl1e, gpl2e, spl2e, gl1pfn, sl1pfn=0, sl1ss;
+ unsigned long *gpl1e, *spl1e, gl2e, sl2e, gl1pfn, sl1pfn=0, sl1ss;
struct pfn_info *sl1pfn_info;
int i;
- __guest_get_pl2e(ed, va, &gpl2e);
+ __guest_get_l2e(ed, va, &gl2e);
- gl1pfn = gpl2e >> PAGE_SHIFT;
+ gl1pfn = gl2e >> PAGE_SHIFT;
sl1ss = __shadow_status(d, gl1pfn);
if ( !(sl1ss & PSH_shadowed) )
set_shadow_status(d, gl1pfn, PSH_shadowed | sl1pfn);
- l2pde_general(d, &gpl2e, &spl2e, sl1pfn);
+ l2pde_general(d, &gl2e, &sl2e, sl1pfn);
- __guest_set_pl2e(ed, va, gpl2e);
- __shadow_set_pl2e(ed, va, spl2e);
+ __guest_set_l2e(ed, va, gl2e);
+ __shadow_set_l2e(ed, va, sl2e);
gpl1e = (unsigned long *) &(linear_pg_table[
(va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]);
SH_VVLOG("4b: was shadowed, l2 missing ( %08lx )", sl1pfn);
sl1pfn = sl1ss & PSH_pfn_mask;
- l2pde_general(d, &gpl2e, &spl2e, sl1pfn);
- __guest_set_pl2e(ed, va, gpl2e);
- __shadow_set_pl2e(ed, va, spl2e);
+ l2pde_general(d, &gl2e, &sl2e, sl1pfn);
+ __guest_set_l2e(ed, va, gl2e);
+ __shadow_set_l2e(ed, va, sl2e);
}
}
int shadow_fault(unsigned long va, long error_code)
{
- unsigned long gpte, spte;
+ unsigned long gpte, spte = 0;
struct exec_domain *ed = current;
struct domain *d = ed->domain;
if ( unlikely(__get_user(gpte, (unsigned long *)
&linear_pg_table[va >> PAGE_SHIFT])) )
{
- SH_VVLOG("shadow_fault - EXIT: read gpte faulted" );
+ SH_VVLOG("shadow_fault - EXIT: read gpte faulted2" );
shadow_unlock(d);
return 0;
}
if ( unlikely(!(gpte & _PAGE_PRESENT)) )
{
- SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
+ SH_VVLOG("shadow_fault - EXIT: gpte not present2 (%lx)",gpte );
shadow_unlock(d);
return 0;
}
void shadow_l1_normal_pt_update(
unsigned long pa, unsigned long gpte,
- unsigned long *prev_spfn_ptr,
+ unsigned long *prev_smfn_ptr,
l1_pgentry_t **prev_spl1e_ptr)
{
- unsigned long spfn, spte, prev_spfn = *prev_spfn_ptr;
+ unsigned long smfn, spte, prev_smfn = *prev_smfn_ptr;
l1_pgentry_t *spl1e, *prev_spl1e = *prev_spl1e_ptr;
/* N.B. To get here, we know the l1 page *must* be shadowed. */
SH_VVLOG("shadow_l1_normal_pt_update pa=%08lx, gpte=%08lx, "
- "prev_spfn=%08lx, prev_spl1e=%p\n",
- pa, gpte, prev_spfn, prev_spl1e);
+ "prev_smfn=%08lx, prev_spl1e=%p",
+ pa, gpte, prev_smfn, prev_spl1e);
- spfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
+ smfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
- if ( spfn == prev_spfn )
+ if ( smfn == prev_smfn )
{
spl1e = prev_spl1e;
}
{
if ( prev_spl1e != NULL )
unmap_domain_mem( prev_spl1e );
- spl1e = (l1_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
- *prev_spfn_ptr = spfn;
+ spl1e = (l1_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
+ *prev_smfn_ptr = smfn;
*prev_spl1e_ptr = spl1e;
}
spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = mk_l1_pgentry(spte);
}
-void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte)
+void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde)
{
- unsigned long spfn, spte;
+ unsigned long sl2mfn, spde;
l2_pgentry_t *spl2e;
- unsigned long s_sh;
+ unsigned long sl1mfn;
/* N.B. To get here, we know the l2 page *must* be shadowed. */
- SH_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpte=%08lx",pa,gpte);
+ SH_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpde=%08lx",pa,gpde);
- spfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
+ sl2mfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask;
- s_sh = (gpte & _PAGE_PRESENT) ?
- __shadow_status(current->domain, gpte >> PAGE_SHIFT) : 0;
+ sl1mfn = (gpde & _PAGE_PRESENT) ?
+ __shadow_status(current->domain, gpde >> PAGE_SHIFT) : 0;
/* XXXX Should mark guest pte as DIRTY and ACCESSED too! */
- l2pde_general(current->domain, &gpte, &spte, s_sh);
- spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
- spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spte);
+ l2pde_general(current->domain, &gpde, &spde, sl1mfn);
+ spl2e = (l2_pgentry_t *)map_domain_mem(sl2mfn << PAGE_SHIFT);
+ spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spde);
unmap_domain_mem(spl2e);
}
#if SHADOW_DEBUG
+// BUG: these are not SMP safe...
static int sh_l2_present;
static int sh_l1_present;
+static int errors;
char * sh_check_name;
-#define FAIL(_f, _a...) \
- do { \
- printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx\n", \
- sh_check_name, level, i, ## _a , gpte, spte); \
- BUG(); \
+#define virt_to_phys2(adr) ({ \
+ unsigned long _a = (unsigned long)(adr); \
+ unsigned long _pte = l1_pgentry_val( \
+ shadow_linear_pg_table[_a >> PAGE_SHIFT]); \
+ unsigned long _pa = _pte & PAGE_MASK; \
+ _pa | (_a & ~PAGE_MASK); \
+})
+
+#define FAIL(_f, _a...) \
+ do { \
+ printk("XXX %s-FAIL (%d,%d)" _f " g=%08lx s=%08lx &g=%08lx &s=%08lx" \
+ " pa(&g)=%08lx pa(&s)=%08lx\n", \
+ sh_check_name, level, i, ## _a , gpte, spte, pgpte, pspte, \
+ virt_to_phys2(pgpte), virt_to_phys2(pspte)); \
+ errors++; \
} while ( 0 )
static int check_pte(
- struct domain *d, unsigned long gpte, unsigned long spte,
+ struct domain *d, unsigned long *pgpte, unsigned long *pspte,
int level, int i)
{
- unsigned long mask, gpfn, spfn;
- unsigned long guest_gpfn;
+ unsigned gpte = *pgpte;
+ unsigned spte = *pspte;
+ unsigned long mask, gpfn, smfn;
if ( (spte == 0) || (spte == 0xdeadface) || (spte == 0x00000E00) )
return 1; /* always safe */
if ( !(gpte & _PAGE_PRESENT) )
FAIL("Guest not present yet shadow is");
- mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|0xFFFFF000);
+ mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|PAGE_MASK);
if ( (spte & mask) != (gpte & mask) )
FAIL("Corrupt?");
if ( (spte & _PAGE_RW ) && !((gpte & _PAGE_RW) && (gpte & _PAGE_DIRTY)) )
FAIL("RW2 coherence");
- spfn = spte >> PAGE_SHIFT;
+ smfn = spte >> PAGE_SHIFT;
gpfn = gpte >> PAGE_SHIFT;
- if ( gpfn == spfn )
+ if ( gpfn == smfn )
{
if ( level > 1 )
FAIL("Linear map ???"); /* XXX this will fail on BSD */
if ( level < 2 )
FAIL("Shadow in L1 entry?");
- if (d->arch.shadow_mode == SHM_full_32) {
-
- guest_gpfn = phys_to_machine_mapping(gpfn);
-
- if ( __shadow_status(d, guest_gpfn) != (PSH_shadowed | spfn) )
- FAIL("spfn problem g.sf=%08lx",
- __shadow_status(d, guest_gpfn) );
-
- } else {
- if ( __shadow_status(d, gpfn) != (PSH_shadowed | spfn) )
- FAIL("spfn problem g.sf=%08lx",
- __shadow_status(d, gpfn) );
- }
-
+ if ( __shadow_status(d, gpfn) != (PSH_shadowed | smfn) )
+ FAIL("smfn problem g.sf=%08lx",
+ __shadow_status(d, gpfn) );
}
return 1;
static int check_l1_table(
- struct domain *d, unsigned long va,
- unsigned long g2, unsigned long s2)
+ struct domain *d,
+ unsigned long g2mfn, unsigned long s2mfn)
{
int i;
unsigned long *gpl1e, *spl1e;
- gpl1e = map_domain_mem(g2 << PAGE_SHIFT);
- spl1e = map_domain_mem(s2 << PAGE_SHIFT);
+ gpl1e = map_domain_mem(g2mfn << PAGE_SHIFT);
+ spl1e = map_domain_mem(s2mfn << PAGE_SHIFT);
for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
- check_pte(d, gpl1e[i], spl1e[i], 1, i);
+ check_pte(d, &gpl1e[i], &spl1e[i], 1, i);
unmap_domain_mem(spl1e);
unmap_domain_mem(gpl1e);
#define FAILPT(_f, _a...) \
do { \
printk("XXX FAIL %s-PT" _f "\n", s, ## _a ); \
- BUG(); \
+ errors++; \
} while ( 0 )
-int check_pagetable(struct domain *d, pagetable_t pt, char *s)
+void check_pagetable(struct domain *d, pagetable_t pt, char *s)
{
unsigned long gptbase = pagetable_val(pt);
- unsigned long gpfn, spfn;
+ unsigned long ptbase_pfn, smfn, ss;
unsigned long i;
l2_pgentry_t *gpl2e, *spl2e;
- unsigned long host_gpfn = 0;
+ unsigned long ptbase_mfn = 0;
+ int cpu = current->processor;
+ errors = 0;
sh_check_name = s;
SH_VVLOG("%s-PT Audit", s);
sh_l2_present = sh_l1_present = 0;
- gpfn = gptbase >> PAGE_SHIFT;
+ ptbase_pfn = gptbase >> PAGE_SHIFT;
+ ptbase_mfn = __gpfn_to_mfn(d, ptbase_pfn);
- __get_phys_to_machine(d, host_gpfn, gpfn);
+ ss = __shadow_status(d, ptbase_pfn);
- if ( ! (__shadow_status(d, gpfn) & PSH_shadowed) )
+ if ( ! (ss & PSH_shadowed) )
{
printk("%s-PT %08lx not shadowed\n", s, gptbase);
- if( __shadow_status(d, gpfn) != 0 ) BUG();
- return 0;
+ if ( ss != 0 )
+ BUG();
+ return;
}
- spfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
+ smfn = ss & PSH_pfn_mask;
- if ( ! __shadow_status(d, gpfn) == (PSH_shadowed | spfn) )
- FAILPT("ptbase shadow inconsistent1");
+ if ( ss != (PSH_shadowed | smfn) )
+ FAILPT("ptbase shadow inconsistent1");
- if (d->arch.shadow_mode == SHM_full_32)
- {
- host_gpfn = phys_to_machine_mapping(gpfn);
- gpl2e = (l2_pgentry_t *) map_domain_mem( host_gpfn << PAGE_SHIFT );
-
- } else
- gpl2e = (l2_pgentry_t *) map_domain_mem( gpfn << PAGE_SHIFT );
-
- spl2e = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT );
+ gpl2e = (l2_pgentry_t *) map_domain_mem( ptbase_mfn << PAGE_SHIFT );
+ spl2e = (l2_pgentry_t *) map_domain_mem( smfn << PAGE_SHIFT );
if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
&gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
if ( (l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
L2_PAGETABLE_SHIFT]) !=
- ((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) )
+ ((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) )
FAILPT("hypervisor shadow linear map inconsistent %08lx %08lx",
l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
L2_PAGETABLE_SHIFT]),
- (spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ (smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
- if (d->arch.shadow_mode != SHM_full_32) {
+ if ( shadow_mode(d) != SHM_full_32 ) {
+ // BUG: this shouldn't be using exec_domain[0] here...
if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
- ((__pa(page_get_owner(&frame_table[gpfn])->arch.mm_perdomain_pt) |
+ ((__pa(page_get_owner(&frame_table[ptbase_pfn])->arch.mm_perdomain_pt) |
__PAGE_HYPERVISOR))) )
FAILPT("hypervisor per-domain map inconsistent");
}
/* Check the whole L2. */
for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
- check_pte(d, l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]), 2, i);
+ check_pte(d, &l2_pgentry_val(gpl2e[i]), &l2_pgentry_val(spl2e[i]), 2, i);
/* Go back and recurse. */
for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
{
+ unsigned long gl1pfn = l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT;
+ unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn);
+ unsigned long sl1mfn = l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT;
+
if ( l2_pgentry_val(spl2e[i]) != 0 )
- check_l1_table(
- d, i << L2_PAGETABLE_SHIFT,
- l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT,
- l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT);
+ {
+ // First check to see if this guest page is currently the active
+ // PTWR page. If so, then we compare the (old) cached copy of the
+ // guest page to the shadow, and not the currently writable (and
+ // thus potentially out-of-sync) guest page.
+ //
+ if ( ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va &&
+ (i == ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l2_idx) &&
+ likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
+ {
+ gl1mfn = (__pa(ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].page) >>
+ PAGE_SHIFT);
+ }
+
+ check_l1_table(d, gl1mfn, sl1mfn);
+ }
}
unmap_domain_mem(spl2e);
unmap_domain_mem(gpl2e);
- SH_VVLOG("PT verified : l2_present = %d, l1_present = %d\n",
+ SH_VVLOG("PT verified : l2_present = %d, l1_present = %d",
sh_l2_present, sh_l1_present);
- return 1;
+ if ( errors )
+ BUG();
+
+ return;
}
-#endif
+#endif // SHADOW_DEBUG
perfc_incrc(page_faults);
+#if 0
+ printk("do_page_fault(addr=0x%08lx, error_code=%d)\n",
+ addr, regs->error_code);
+ show_registers(regs);
+#endif
+
if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
{
LOCK_BIGLOCK(d);
#include <asm/vmx_vmcs.h>
#include <public/io/ioreq.h>
+#ifdef CONFIG_VMX
+
int vmcs_size;
unsigned int opt_vmx_debug_level;
/*
* Set up guest page directory cache to make linear_pt_table[] work.
*/
- __guest_get_pl2e(ed, va, &gpde);
+ __guest_get_l2e(ed, va, &gpde);
if (!(gpde & _PAGE_PRESENT))
return 0;
unsigned long gpde, gpte, pfn, index;
struct exec_domain *ed = current;
- __guest_get_pl2e(ed, gva, &gpde);
+ __guest_get_l2e(ed, gva, &gpde);
index = (gva >> L2_PAGETABLE_SHIFT);
pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
local_irq_disable();
asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2));
}
+
+#endif /* CONFIG_VMX */
#include <public/io/ioreq.h>
#include <asm/vmx_platform.h>
+#ifdef CONFIG_VMX
+
extern long do_block();
#if defined (__i386__)
if (!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags))
vmx_intr_assist(d);
}
+
+#endif /* CONFIG_VMX */
#include <xen/sched.h>
#include <asm/current.h>
+#ifdef CONFIG_VMX
+
#define DECODE_success 1
#define DECODE_failure 0
domain_crash();
}
+#endif /* CONFIG_VMX */
#include <public/io/ioreq.h>
#include <asm/domain_page.h>
+#ifdef CONFIG_VMX
+
struct vmcs_struct *alloc_vmcs(void)
{
struct vmcs_struct *vmcs;
BUG();
}
+#endif /* CONFIG_VMX */
#include <xen/event.h>
#include <xen/elf.h>
#include <xen/kernel.h>
+#include <asm/shadow.h>
/* No ring-3 access in initial page tables. */
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
-#if 0 /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */
- shadow_lock(&d->mm);
- shadow_mode_enable(d, SHM_test);
- shadow_unlock(&d->mm);
+#ifndef NDEBUG
+ if (0) /* XXXXX DO NOT CHECK IN ENABLED !!! (but useful for testing so leave) */
+ {
+ shadow_lock(d);
+ shadow_mode_enable(d, SHM_test);
+ shadow_unlock(d);
+ }
#endif
return 0;
#include <xen/console.h>
#include <xen/mm.h>
#include <xen/irq.h>
+#include <asm/flushtlb.h>
static int kstack_depth_to_print = 8*20;
regs->esi, regs->edi, regs->ebp, esp);
printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
ds, es, fs, gs, ss);
+ printk("cr3: %08lx\n", read_cr3());
show_stack((unsigned long *)®s->esp);
}
#include <xen/perfc.h>
#include <asm/processor.h>
+#ifdef CONFIG_VMX
+#include <asm/domain_page.h>
+#endif
+
/* Shadow PT flag bits in pfn_info */
#define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
#define PSH_pfn_mask ((1<<21)-1)
extern void shadow_l1_normal_pt_update(
unsigned long pa, unsigned long gpte,
unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);
-extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte);
+extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde);
extern void unshadow_table(unsigned long gpfn, unsigned int type);
extern int shadow_mode_enable(struct domain *p, unsigned int mode);
extern void vmx_shadow_invlpg(struct domain *, unsigned long);
#endif
-#define __get_machine_to_phys(_d, guest_gpfn, gpfn) \
- if ((_d)->arch.shadow_mode == SHM_full_32) \
- (guest_gpfn) = machine_to_phys_mapping[(gpfn)]; \
- else \
- (guest_gpfn) = (gpfn);
+#define __mfn_to_gpfn(_d, mfn) \
+ ( (shadow_mode(_d) == SHM_full_32) \
+ ? machine_to_phys_mapping[(mfn)] \
+ : (mfn) )
-#define __get_phys_to_machine(_d, host_gpfn, gpfn) \
- if ((_d)->arch.shadow_mode == SHM_full_32) \
- (host_gpfn) = phys_to_machine_mapping(gpfn); \
- else \
- (host_gpfn) = (gpfn);
+#define __gpfn_to_mfn(_d, gpfn) \
+ ( (shadow_mode(_d) == SHM_full_32) \
+ ? phys_to_machine_mapping(gpfn) \
+ : (gpfn) )
extern void __shadow_mode_disable(struct domain *d);
static inline void shadow_mode_disable(struct domain *d)
struct domain *d, unsigned long gpfn);
static inline void shadow_invalidate(struct exec_domain *ed) {
- if ( ed->domain->arch.shadow_mode != SHM_full_32 )
+ if ( shadow_mode(ed->domain) != SHM_full_32 )
BUG();
memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
}
#define SHADOW_DEBUG 1
+#define SHADOW_VERBOSE_DEBUG 0
#define SHADOW_HASH_DEBUG 1
struct shadow_status {
unsigned long pfn; /* Guest pfn. */
- unsigned long spfn_and_flags; /* Shadow pfn plus flags. */
+ unsigned long smfn_and_flags; /* Shadow mfn plus flags. */
struct shadow_status *next; /* Pull-to-front list. */
};
#define shadow_ht_buckets 256
#ifdef VERBOSE
-#define SH_LOG(_f, _a...) \
-printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
- current->domain->id , __LINE__ , ## _a )
+#define SH_LOG(_f, _a...) \
+printk("DOM%uP%u: (file=shadow.c, line=%d) " _f "\n", \
+ current->domain->id , current->processor, __LINE__ , ## _a )
#else
#define SH_LOG(_f, _a...)
#endif
#if SHADOW_DEBUG
-#define SH_VLOG(_f, _a...) \
- printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
- current->domain->id , __LINE__ , ## _a )
+#define SH_VLOG(_f, _a...) \
+ printk("DOM%uP%u: (file=shadow.c, line=%d) " _f "\n", \
+ current->domain->id, current->processor, __LINE__ , ## _a )
#else
#define SH_VLOG(_f, _a...)
#endif
-#if 0
-#define SH_VVLOG(_f, _a...) \
- printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
- current->domain->id , __LINE__ , ## _a )
+#if SHADOW_VERBOSE_DEBUG
+#define SH_VVLOG(_f, _a...) \
+ printk("DOM%uP%u: (file=shadow.c, line=%d) " _f "\n", \
+ current->domain->id, current->processor, __LINE__ , ## _a )
#else
#define SH_VVLOG(_f, _a...)
#endif
-static inline void __shadow_get_pl2e(
+// BUG: mafetter: this assumes ed == current, so why pass ed?
+static inline void __shadow_get_l2e(
struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
{
- *sl2e = (ed->domain->arch.shadow_mode == SHM_full_32) ?
- l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]) :
- l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
+ if ( shadow_mode(ed->domain) == SHM_full_32 ) {
+ *sl2e = l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]);
+ }
+ else if ( shadow_mode(ed->domain) ) {
+ *sl2e = l2_pgentry_val(shadow_linear_l2_table[l2_table_offset(va)]);
+ }
+ else
+ *sl2e = l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
}
-static inline void __shadow_set_pl2e(
+static inline void __shadow_set_l2e(
struct exec_domain *ed, unsigned long va, unsigned long value)
{
- if ( ed->domain->arch.shadow_mode == SHM_full_32 )
+ if ( shadow_mode(ed->domain) == SHM_full_32 ) {
ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+ }
+ else if ( shadow_mode(ed->domain) ) {
+ shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
+ }
else
linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
}
-static inline void __guest_get_pl2e(
+static inline void __guest_get_l2e(
struct exec_domain *ed, unsigned long va, unsigned long *l2e)
{
- *l2e = (ed->domain->arch.shadow_mode == SHM_full_32) ?
+ *l2e = ( shadow_mode(ed->domain) == SHM_full_32) ?
l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) :
l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
}
-static inline void __guest_set_pl2e(
+static inline void __guest_set_l2e(
struct exec_domain *ed, unsigned long va, unsigned long value)
{
- if ( ed->domain->arch.shadow_mode == SHM_full_32 )
+ if ( shadow_mode(ed->domain) == SHM_full_32 )
{
unsigned long pfn;
{
unsigned long gpte = *gpte_p;
unsigned long spte = *spte_p;
+ unsigned long pfn = gpte >> PAGE_SHIFT;
+ unsigned long mfn = __gpfn_to_mfn(d, pfn);
ASSERT(gpte & _PAGE_RW);
gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
- switch ( d->arch.shadow_mode )
- {
- case SHM_test:
- spte = gpte | _PAGE_RW;
- break;
+ if ( shadow_mode(d) == SHM_logdirty )
+ __mark_dirty(d, pfn);
- case SHM_logdirty:
- spte = gpte | _PAGE_RW;
- __mark_dirty(d, gpte >> PAGE_SHIFT);
+ spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
- case SHM_full_32:
- {
- unsigned long host_pfn, host_gpte;
-
- host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
- host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
- spte = host_gpte | _PAGE_RW;
- }
- break;
- }
-
- SH_VVLOG("updating spte=%lx gpte=%lx", spte, gpte);
+ SH_VVLOG("l1pte_write_fault: updating spte=0x%08lx gpte=0x%08lx", spte, gpte);
*gpte_p = gpte;
*spte_p = spte;
}
{
unsigned long gpte = *gpte_p;
unsigned long spte = *spte_p;
+ unsigned long pfn = gpte >> PAGE_SHIFT;
+ unsigned long mfn = __gpfn_to_mfn(d, pfn);
gpte |= _PAGE_ACCESSED;
+ spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
- switch ( d->arch.shadow_mode )
- {
- case SHM_test:
- spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
- break;
-
- case SHM_logdirty:
- spte = gpte & ~_PAGE_RW;
- break;
-
- case SHM_full_32:
- {
- unsigned long host_pfn, host_gpte;
-
- host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
- host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
- spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW);
- }
- break;
-
- }
+ if ( (shadow_mode(d) == SHM_logdirty) || ! (gpte & _PAGE_DIRTY) )
+ spte &= ~_PAGE_RW;
+ SH_VVLOG("l1pte_read_fault: updating spte=0x%08lx gpte=0x%08lx", spte, gpte);
*gpte_p = gpte;
*spte_p = spte;
}
unsigned long gpte = *gpte_p;
unsigned long spte = *spte_p;
unsigned long host_pfn, host_gpte;
+#if SHADOW_VERBOSE_DEBUG
+ unsigned long old_spte = spte;
+#endif
- switch ( d->arch.shadow_mode )
+ switch ( shadow_mode(d) )
{
case SHM_test:
spte = 0;
break;
}
+#if SHADOW_VERBOSE_DEBUG
+ if ( old_spte || spte || gpte )
+ SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%08lx, old spte=0x%08lx, new spte=0x%08lx ", gpte, old_spte, spte);
+#endif
+
*gpte_p = gpte;
*spte_p = spte;
}
struct domain *d,
unsigned long *gpde_p,
unsigned long *spde_p,
- unsigned long sl1pfn)
+ unsigned long sl1mfn)
{
unsigned long gpde = *gpde_p;
unsigned long spde = *spde_p;
spde = 0;
- if ( sl1pfn != 0 )
+ if ( sl1mfn != 0 )
{
- spde = (gpde & ~PAGE_MASK) | (sl1pfn << PAGE_SHIFT) |
+ spde = (gpde & ~PAGE_MASK) | (sl1mfn << PAGE_SHIFT) |
_PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
gpde |= _PAGE_ACCESSED | _PAGE_DIRTY;
/* Detect linear p.t. mappings and write-protect them. */
- if ( (frame_table[sl1pfn].u.inuse.type_info & PGT_type_mask) ==
+ if ( (frame_table[sl1mfn].u.inuse.type_info & PGT_type_mask) ==
PGT_l2_page_table )
{
- if ( d->arch.shadow_mode != SHM_full_32 )
+ if ( shadow_mode(d) != SHM_full_32 )
spde = gpde & ~_PAGE_RW;
}
for ( j = 0; j < shadow_ht_buckets; j++ )
{
a = &d->arch.shadow_ht[j];
- if ( a->pfn ) { live++; ASSERT(a->spfn_and_flags & PSH_pfn_mask); }
+ if ( a->pfn ) { live++; ASSERT(a->smfn_and_flags & PSH_pfn_mask); }
ASSERT(a->pfn < 0x00100000UL);
a = a->next;
while ( a && (live < 9999) )
{
live++;
- if ( (a->pfn == 0) || (a->spfn_and_flags == 0) )
+ if ( (a->pfn == 0) || (a->smfn_and_flags == 0) )
{
printk("XXX live=%d pfn=%08lx sp=%08lx next=%p\n",
- live, a->pfn, a->spfn_and_flags, a->next);
+ live, a->pfn, a->smfn_and_flags, a->next);
BUG();
}
ASSERT(a->pfn < 0x00100000UL);
- ASSERT(a->spfn_and_flags & PSH_pfn_mask);
+ ASSERT(a->smfn_and_flags & PSH_pfn_mask);
a = a->next;
}
ASSERT(live < 9999);
}
+/*
+ * N.B. This takes a guest pfn (i.e. a pfn in the guest's namespace,
+ * which, depending on full shadow mode, may or may not equal
+ * its mfn).
+ * The shadow status it returns is a mfn.
+ */
static inline unsigned long __shadow_status(
struct domain *d, unsigned int gpfn)
{
x = head = hash_bucket(d, gpfn);
p = NULL;
- SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x);
+ //SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x);
shadow_audit(d, 0);
do
/* Swap 'x' contents with head contents. */
SWAP(head->pfn, x->pfn);
- SWAP(head->spfn_and_flags, x->spfn_and_flags);
+ SWAP(head->smfn_and_flags, x->smfn_and_flags);
}
- return head->spfn_and_flags;
+ SH_VVLOG("lookup gpfn=%08lx => status=%08lx",
+ gpfn, head->smfn_and_flags);
+ return head->smfn_and_flags;
}
p = x;
}
while ( x != NULL );
+ SH_VVLOG("lookup gpfn=%08lx => status=0", gpfn);
return 0;
}
{
unsigned long res;
- ASSERT(d->arch.shadow_mode);
+ ASSERT(shadow_mode(d));
/*
* If we get here we know that some sort of update has happened to the
shadow_lock(d);
- if ( d->arch.shadow_mode == SHM_logdirty )
+ if ( shadow_mode(d) == SHM_logdirty )
__mark_dirty(d, gpfn);
if ( !(res = __shadow_status(d, gpfn)) )
{
/* Overwrite head with contents of following node. */
head->pfn = n->pfn;
- head->spfn_and_flags = n->spfn_and_flags;
+ head->smfn_and_flags = n->smfn_and_flags;
/* Delete following node. */
head->next = n->next;
/* Add deleted node to the free list. */
n->pfn = 0;
- n->spfn_and_flags = 0;
+ n->smfn_and_flags = 0;
n->next = d->arch.shadow_ht_free;
d->arch.shadow_ht_free = n;
}
{
/* This bucket is now empty. Initialise the head node. */
head->pfn = 0;
- head->spfn_and_flags = 0;
+ head->smfn_and_flags = 0;
}
goto found;
/* Add deleted node to the free list. */
x->pfn = 0;
- x->spfn_and_flags = 0;
+ x->smfn_and_flags = 0;
x->next = d->arch.shadow_ht_free;
d->arch.shadow_ht_free = x;
{
if ( x->pfn == gpfn )
{
- x->spfn_and_flags = s;
+ x->smfn_and_flags = s;
goto done;
}
if ( head->pfn == 0 )
{
head->pfn = gpfn;
- head->spfn_and_flags = s;
+ head->smfn_and_flags = s;
ASSERT(head->next == NULL);
goto done;
}
/* Initialise the new node and insert directly after the head item. */
x->pfn = gpfn;
- x->spfn_and_flags = s;
+ x->smfn_and_flags = s;
x->next = head->next;
head->next = x;
}
#ifdef CONFIG_VMX
-#include <asm/domain_page.h>
static inline void vmx_update_shadow_state(
- struct exec_domain *ed, unsigned long gpfn, unsigned long spfn)
+ struct exec_domain *ed, unsigned long gpfn, unsigned long smfn)
{
l2_pgentry_t *mpl2e = 0;
map_domain_mem(pagetable_val(ed->arch.monitor_table));
mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
- mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
+ mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
__flush_tlb_one(SH_LINEAR_PT_VIRT_START);
- spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT);
+ spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
gpl2e = (l2_pgentry_t *)map_domain_mem(gpfn << PAGE_SHIFT);
memset(spl2e, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
- ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
ed->arch.shadow_vtable = spl2e;
ed->arch.vpagetable = gpl2e; /* expect the guest did clean this up */
unmap_domain_mem(mpl2e);
}
+#endif /* CONFIG_VMX */
+
static inline void __shadow_mk_pagetable(struct exec_domain *ed)
{
struct domain *d = ed->domain;
unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
- unsigned long spfn;
- SH_VLOG("0: __shadow_mk_pagetable(gpfn=%08lx\n", gpfn);
+ unsigned long smfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
- if (d->arch.shadow_mode == SHM_full_32)
- {
- unsigned long guest_gpfn;
- guest_gpfn = machine_to_phys_mapping[gpfn];
-
- SH_VVLOG("__shadow_mk_pagetable(guest_gpfn=%08lx, gpfn=%08lx\n",
- guest_gpfn, gpfn);
-
- spfn = __shadow_status(d, guest_gpfn) & PSH_pfn_mask;
- if ( unlikely(spfn == 0) ) {
- spfn = shadow_l2_table(d, gpfn);
- ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
- } else {
- vmx_update_shadow_state(ed, gpfn, spfn);
- }
- } else {
- spfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
+ SH_VVLOG("0: __shadow_mk_pagetable(gpfn=%08lx, smfn=%08lx)", gpfn, smfn);
- if ( unlikely(spfn == 0) ) {
- spfn = shadow_l2_table(d, gpfn);
- }
- ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
- }
-}
-#else
-static inline void __shadow_mk_pagetable(struct exec_domain *ed)
-{
- unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT;
- unsigned long spfn = __shadow_status(ed->domain, gpfn);
-
- if ( unlikely(spfn == 0) )
- spfn = shadow_l2_table(ed->domain, gpfn);
+ if ( unlikely(smfn == 0) )
+ smfn = shadow_l2_table(d, gpfn);
+#ifdef CONFIG_VMX
+ else
+ if (d->arch.shadow_mode == SHM_full_32)
+ vmx_update_shadow_state(ed, gpfn, smfn);
+#endif
- ed->arch.shadow_table = mk_pagetable(spfn << PAGE_SHIFT);
+ ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
}
-#endif /* CONFIG_VMX */
static inline void shadow_mk_pagetable(struct exec_domain *ed)
{
- if ( unlikely(ed->domain->arch.shadow_mode) )
+ if ( unlikely(shadow_mode(ed->domain)) )
{
SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
pagetable_val(ed->arch.pagetable),
- ed->domain->arch.shadow_mode);
+ shadow_mode(ed->domain));
shadow_lock(ed->domain);
__shadow_mk_pagetable(ed);
SH_VVLOG("leaving shadow_mk_pagetable:\n"
"( gptbase=%08lx, mode=%d ) sh=%08lx",
pagetable_val(ed->arch.pagetable),
- ed->domain->arch.shadow_mode,
+ shadow_mode(ed->domain),
pagetable_val(ed->arch.shadow_table) );
}
}
#if SHADOW_DEBUG
-extern int check_pagetable(struct domain *d, pagetable_t pt, char *s);
+extern void check_pagetable(struct domain *d, pagetable_t pt, char *s);
#else
#define check_pagetable(d, pt, s) ((void)0)
#endif